File Loader Script

The following is a file Loader script that tweaks the existing sklearn.datasets load_files function to recursively load nested directory structure.

The Following script expects you to have installed sklearn. If not already installed do a : pip install sklearn or sudo pip install sklearn

Load text files with categories as subfolder names. Individual samples are assumed to be files stored a heirarchical folder structure such as the following:

    container_folder/
        Intermediate_folder1/......
            category_1_folder/
                file_1.txt
                file_2.txt
                ...
                file_42.txt
        Intermediate_folder1/......
            category_2_folder/
                file_43.txt
                file_44.txt
            ...
The folder names are used as supervised signal label names. The
individual file names are not important.
This function does not try to extract features into a numpy array or
scipy sparse matrix. In addition, if load_content is false it
does not try to load the files in memory.
To use text files in a scikit-learn classification or clustering
algorithm, you will need to use the `sklearn.feature_extraction.text`
module to build a feature extraction transformer that suits your
problem.
If you set load_content=True, you should also specify the encoding of
the text using the 'encoding' parameter. For many modern text files,
'utf-8' will be the correct encoding. If you leave encoding equal to None,
then the content will be made of bytes instead of Unicode, and you will
not be able to use most functions in `sklearn.feature_extraction.text`.
Similar feature extractors should be built for other kind of unstructured
data input such as images, audio, video, ...

Parameters
----------
container_path : string or unicode
    Path to the main folder holding one subfolder per category
level: integer
    Specify the depth of the directory structure that you wish to store as your category labels

description: string or unicode, optional (default=None)
    A paragraph describing the characteristic of the dataset: its source,
    reference, etc.
categories : A collection of strings or None, optional (default=None)
    If None (default), load all the categories.
    If not None, list of category names to load (other categories ignored).
exlude: A collections of Strings or None, optional (default=None)
    If None (default), load all the categories.
    If not None, list of category names in exclude are ignored
OneVsAll: A String (default= None)
    Creates a OneVsAll classifier with the parameter in the string. 
    Eg: OneVsAll =['Agro'] will create a binary classifier where Documents
    are classified into two classes i.e Agro Vs Non-Agro
    <b> Only supported for Level1 now
Cat: Often given together with Categories parameter.
    If cat=1, then categories parameter takes a list of level 1 classes 
    cat=2, then categories parameter takes a list of level 2 classes 
load_content : boolean, optional (default=True)
    Whether to load or not the content of the different files. If
    true a 'data' attribute containing the text information is present
    in the data structure returned. If not, a filenames attribute
    gives the path to the files.
encoding : string or None (default is None)
    If None, do not try to decode the content of the files (e.g. for
    images or other non-text content).
    If not None, encoding to use to decode text files to Unicode if
    load_content is True.
decode_error: {'strict', 'ignore', 'replace'}, optional
    Instruction on what to do if a byte sequence is given to analyze that
    contains characters not of the given `encoding`. Passed as keyword
    argument 'errors' to bytes.decode.
shuffle : bool, optional (default=True)
    Whether or not to shuffle the data: might be important for models that
    make the assumption that the samples are independent and identically
    distributed (i.i.d.), such as stochastic gradient descent.
random_state : int, RandomState instance or None, optional (default=0)
    If int, random_state is the seed used by the random number generator;
    If RandomState instance, random_state is the random number generator;
    If None, the random number generator is the RandomState instance used
    by `np.random`.
Returns
-------
data : Bunch
    Dictionary-like object, the interesting attributes are: either
    data, the raw text data to learn, or 'filenames', the files
    holding it, 'target', the classification labels (integer index),
    'target_names', the meaning of the labels, and 'DESCR', the full
    description of the dataset.

In [120]:
import os
import shutil
from os import environ
from os.path import dirname
from os.path import join
from os.path import exists
from os.path import expanduser
from os.path import isdir
from os import listdir
import glob
import numpy as np

from sklearn.utils import check_random_state


class MyError(Exception):
    pass


class Bunch(dict):
    
    def __init__(self, **kwargs):
        dict.__init__(self, kwargs)

    def __setattr__(self, key, value):
        self[key] = value

    def __getattr__(self, key):
        try:
            return self[key]
        except KeyError:
            raise AttributeError(key)

    def __getstate__(self):
        return self.__dict__


def load_files(container_path,level, description=None, OneVsAll=None, categories=None,
               load_content=True,cat=0,exclude= None,encoding=None,shuffle=False,
               decode_error='strict', random_state=0):
    target = []
    target_names = []
    filenames = []

    if level==1:
        folders = [f for f in sorted(listdir(container_path))
                   if isdir(join(container_path, f))]
        #print folders
         
    elif level==2:
        depth2= sorted(glob.glob(container_path+"/*/*"))        
        l0= [f[:f.rfind('/')] for f in depth2]
        f1=[f[f.rfind('/')+1:] for f in l0]
        folders = [f[(f.rfind('/'))+1:] for f in depth2]                
                    
    else:
        # quit the function and any function(s) that may have called it
        raise MyError('Select Between Level 1 and Level 2')

    if categories is not None and level==1:
        folders = [f for f in folders if f in categories]
        
    if categories is not None and level==2 and cat==1:
        # when categories parameter takes a list of level 1 classes
        folders=[f for f in folders if f.split('.')[0] in categories]
    if categories is not None and level ==2 and cat ==2:
        # when categories parameters takes a list of level 2 classes
        folders=[f for f in folders if f in categories]
        
        
        
    #   print folders
    if exclude is not None:
        folders = [f for f in folders if f not in exclude]
        
    
    
    if OneVsAll is None:
        
        for label, folder in enumerate(folders):
            target_names.append(folder)
            if level==1:
                folder_path = join(container_path, folder)
            elif level==2:
                for i in f1:
                    if folder.split('.')[0] == i:
                        folder_path = join(container_path,i,folder)
            
            
            documents = [join(root, name) for root,dirs, files in sorted(os.walk(folder_path)) for name in files]
            target.extend(len(documents)*[label])
            filenames.extend(documents)
        #print filenames
    
    elif OneVsAll is not None and level==1:
        if len(OneVsAll)==1:
            target_names=[f for f in OneVsAll]
            ComplemetClass = 'Non-'+OneVsAll[0]
            target_names.append(ComplemetClass)
            for label, folder in enumerate(folders):
                folder_path = join(container_path, folder)
                documents = [join(root, name) for root,dirs, files in sorted(os.walk(folder_path)) for name in files]
                if folder==OneVsAll[0]:
                    label=0
                    target.extend(len(documents)*[label])
                elif folder!=OneVsAll[0]:
                    label=1
                    target.extend(len(documents)*[label])
                filenames.extend(documents)
                
        else:
            MyError('OneVsAll can only be list of size 1')
        
    # convert to array for fancy indexing
    filenames = np.array(filenames)
    target = np.array(target)
    

    if shuffle:
        random_state = check_random_state(random_state)
        indices = np.arange(filenames.shape[0])
        random_state.shuffle(indices)
        filenames = filenames[indices]
        target = target[indices]
        
    if load_content:
        data = []
        for filename in filenames:
            
            with open(filename, 'rb') as f:
                data.append(f.read())
        if encoding is not None:
            data = [d.decode(encoding, decode_error) for d in data]
        return Bunch(data=data,
                     filenames=filenames,
                     target_names=target_names,
                     target=target,
                     DESCR=description)

    return Bunch(filenames=filenames,
                 target_names=target_names,
                 target=target,
                 DESCR=description)